From 64f12e8f09934534180b05328c9408a49055acf0 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Mon, 9 May 2011 11:40:42 +0100 Subject: [PATCH] x86/LWP: Add LWP support for SVM guests This patch enables SVM to handle LWP related MSRs and CPUID. It intercepts guests read/write to LWP_CFG. It also save/restore LWP_CFG when guests touch this MSR. The LWP_CBADDR MSR is not intercepted because this MSR is handled by xsave/xrstor. Signed-off-by: Wei Huang --- xen/arch/x86/hvm/svm/svm.c | 76 +++++++++++++++++++++++++++++- xen/arch/x86/hvm/svm/vmcb.c | 5 ++ xen/include/asm-x86/cpufeature.h | 2 + xen/include/asm-x86/hvm/svm/vmcb.h | 3 ++ xen/include/asm-x86/msr-index.h | 4 ++ 5 files changed, 89 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 197fa2c3a8..f5591f779c 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -58,7 +58,8 @@ #include #include #include -#include +#include +#include u32 svm_feature_flags; @@ -695,6 +696,50 @@ static void svm_init_hypercall_page(struct domain *d, void *hypercall_page) *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */ } +static inline void svm_lwp_save(struct vcpu *v) +{ + /* Don't mess up with other guests. Disable LWP for next VCPU. */ + if ( v->arch.hvm_svm.guest_lwp_cfg ) + { + wrmsrl(MSR_AMD64_LWP_CFG, 0x0); + wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0); + } +} + +static inline void svm_lwp_load(struct vcpu *v) +{ + /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */ + if ( v->arch.hvm_svm.guest_lwp_cfg ) + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); +} + +/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */ +static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content) +{ + unsigned int eax, ebx, ecx, edx; + uint32_t msr_low; + + if ( xsave_enabled(v) && cpu_has_lwp ) + { + hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx); + msr_low = (uint32_t)msr_content; + + /* generate #GP if guest tries to turn on unsupported features. */ + if ( msr_low & ~edx) + return -1; + + wrmsrl(MSR_AMD64_LWP_CFG, msr_content); + /* CPU might automatically correct reserved bits. So read it back. */ + rdmsrl(MSR_AMD64_LWP_CFG, msr_content); + v->arch.hvm_svm.guest_lwp_cfg = msr_content; + + /* track nonalzy state if LWP_CFG is non-zero. */ + v->arch.nonlazy_xstate_used = !!(msr_content); + } + + return 0; +} + static void svm_ctxt_switch_from(struct vcpu *v) { int cpu = smp_processor_id(); @@ -703,6 +748,7 @@ static void svm_ctxt_switch_from(struct vcpu *v) svm_save_dr(v); vpmu_save(v); + svm_lwp_save(v); svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); @@ -746,6 +792,7 @@ static void svm_ctxt_switch_to(struct vcpu *v) svm_vmload(vmcb); vmcb->cleanbits.bytes = 0; vpmu_load(v); + svm_lwp_load(v); if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); @@ -1120,6 +1167,24 @@ static void svm_cpuid_intercept( if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) __clear_bit(X86_FEATURE_APIC & 31, edx); break; + case 0x8000001c: + { + /* LWP capability CPUID */ + uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg; + + if ( cpu_has_lwp ) + { + if ( !(v->arch.xcr0 & XSTATE_LWP) ) + { + *eax = 0x0; + break; + } + + /* turn on available bit and other features specified in lwp_cfg */ + *eax = (*edx & lwp_cfg) | 0x00000001; + } + break; + } default: break; } @@ -1227,6 +1292,10 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) *msr_content = vmcb_get_lastinttoip(vmcb); break; + case MSR_AMD64_LWP_CFG: + *msr_content = v->arch.hvm_svm.guest_lwp_cfg; + break; + case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: @@ -1337,6 +1406,11 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) vmcb_set_lastinttoip(vmcb, msr_content); break; + case MSR_AMD64_LWP_CFG: + if ( svm_update_lwp_cfg(v, msr_content) < 0 ) + goto gpf; + break; + case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c index 1a7c6443df..f23ee9aba6 100644 --- a/xen/arch/x86/hvm/svm/vmcb.c +++ b/xen/arch/x86/hvm/svm/vmcb.c @@ -121,6 +121,11 @@ static int construct_vmcb(struct vcpu *v) svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); + /* LWP_CBADDR MSR is saved and restored by FPU code. So SVM doesn't need to + * intercept it. */ + if ( cpu_has_lwp ) + svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR); + vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h index b172164768..9ed1e97c8e 100644 --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -208,6 +208,8 @@ #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP) + #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP) diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h index eecec70ce7..9337ebaf36 100644 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -512,6 +512,9 @@ struct arch_svm_struct { uint64_t guest_sysenter_cs; uint64_t guest_sysenter_esp; uint64_t guest_sysenter_eip; + + /* AMD lightweight profiling MSR */ + uint64_t guest_lwp_cfg; }; struct vmcb_struct *alloc_vmcb(void); diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h index 454696d6aa..8560fb3d63 100644 --- a/xen/include/asm-x86/msr-index.h +++ b/xen/include/asm-x86/msr-index.h @@ -266,6 +266,10 @@ #define MSR_AMD_PATCHLEVEL 0x0000008b #define MSR_AMD_PATCHLOADER 0xc0010020 +/* AMD Lightweight Profiling MSRs */ +#define MSR_AMD64_LWP_CFG 0xc0000105 +#define MSR_AMD64_LWP_CBADDR 0xc0000106 + /* AMD OS Visible Workaround MSRs */ #define MSR_AMD_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD_OSVW_STATUS 0xc0010141 -- 2.30.2